agr <- d %>%
group_by(Category,Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
SD = sd(Accuracy))
## `summarise()` has grouped output by 'Category'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 7 × 4
## # Groups: Category [3]
## Category Task MeanAccuracy SD
## <chr> <chr> <dbl> <dbl>
## 1 Adjs Concrete 0.751 0.432
## 2 Adjs Valence 0.934 0.248
## 3 Nouns Animacy 0.965 0.183
## 4 Nouns Concrete 0.871 0.335
## 5 Nouns Valence 0.931 0.253
## 6 Verbs Concrete 0.917 0.276
## 7 Verbs Valence 0.956 0.205
agr <- d %>%
group_by(Task) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
# View(agr)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanAccuracy, fill = Task)) +
geom_bar(position=dodge,stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9))
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
# guides(fill = "none")
agr <- d %>%
group_by(Task,Category) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
# View(agr)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanAccuracy, fill = Category)) +
geom_bar(position=dodge,stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9))
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
# guides(fill = "none")
agr <- d %>%
group_by(Word,Category,Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word', 'Category'. You can override using
## the `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "black") +
geom_text(aes(label = Word, color = Category), vjust = -0.5, hjust = 1.5) +
facet_wrap(~Task)
## `geom_smooth()` using formula = 'y ~ x'
# guides(legend = "none")
# theme(legend.position = "none") # Remove the legend
# ggsave("../graphs/exp1b_accXrt.pdf",width = 5, height = 3)
agr <- d %>%
group_by(ID.true,Task) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=reorder(ID.true,MeanAccuracy),y=MeanAccuracy,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Task) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
agr <- d %>%
group_by(Word,Task,Category) %>%
reframe(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy)) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
ggplot(agr, aes(x=Task, y=MeanAccuracy,fill=Category)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2))
# guides(fill = "none")
agr <- d %>%
group_by(ID.true,Task,ConcValCombo) %>%
summarize(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'ID.true', 'Task'. You can override using
## the `.groups` argument.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=reorder(ID.true,MeanReactionTime),y=MeanReactionTime,fill=ConcValCombo)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Task) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
agr <- d %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(ConcValCombo,Category) %>%
summarize(PropConcrete = mean(Response.n),
CILow = ci.low(Response.n),
CIHigh = ci.high(Response.n)) %>%
mutate(YMin = PropConcrete - CILow,
YMax = PropConcrete + CIHigh)
## `summarise()` has grouped output by 'ConcValCombo'. You can override using the
## `.groups` argument.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Category,y=PropConcrete,fill=ConcValCombo)) +
geom_bar(position=dodge,stat="identity") +
# facet_wrap(~Version) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9))
# theme(axis.text.x = element_text(angle = 45, hjust = 1))
agr <- d %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(Word,ConcValCombo) %>%
summarize(PropConcrete = mean(Response.n),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = PropConcrete)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_text(aes(label = Word, color = ConcValCombo), vjust = -0.5, hjust = 1.5)
## `geom_smooth()` using formula = 'y ~ x'
# geom_text_repel(aes(label = Word, color = ConcValCombo),
# vjust = -0.5, hjust = 1.5) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette)
agr <- d %>%
filter(Task == "Valence") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("negative", "positive"))) - 1) %>%
group_by(Word,ConcValCombo) %>%
summarize(PropPositive = mean(Response.n),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = PropPositive)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_text(aes(label = Word, color = ConcValCombo), vjust = -0.5, hjust = 1.5)
## `geom_smooth()` using formula = 'y ~ x'
# geom_text_repel(aes(label = Word, color = ConcValCombo),
# vjust = -0.5, hjust = 1.5) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette)
agr <- d %>%
filter(Task == "Animacy") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("inanimate", "animate"))) - 1) %>%
group_by(Word,Animacy,Valence) %>%
summarize(PropAnimate = mean(Response.n),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word', 'Animacy'. You can override using
## the `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = PropAnimate)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_text(aes(label = Word, color = Valence), vjust = -0.5, hjust = 1.5)
## `geom_smooth()` using formula = 'y ~ x'
# geom_text_repel(aes(label = Word, color = ConcValCombo),
# vjust = -0.5, hjust = 1.5) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette)
agr <- d %>%
# filter(Task == "Concrete") %>%
# mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(Word,Task,Category) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word', 'Task'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "black") +
facet_wrap(~Task) +
geom_text(aes(label = Word, color = Category), vjust = -0.5, hjust = 1.5) +
guides(legend = "none")
## `geom_smooth()` using formula = 'y ~ x'
# theme(
# legend.position = "top", # Move legend to the top
# legend.title = element_text(size = 10), # Adjust legend title size
# legend.text = element_text(size = 9) # Adjust legend text size
# )
# geom_text_repel(aes(label = Word, color = ConcValCombo),
# vjust = -0.5, hjust = 1.5) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette)
# ggsave("../graphs/exp3_accXrt.pdf",width = 5, height = 3)
# First remove inaccurate participants
length(unique(d$ID.true))
## [1] 118
inacc.parts <- d %>%
group_by(ID.true,Task,Category) %>%
summarise(MeanAccuracy = mean(Accuracy)) %>%
filter(MeanAccuracy < .75)
## `summarise()` has grouped output by 'ID.true', 'Task'. You can override using
## the `.groups` argument.
# How many participants have Accuracy < .75?
length(unique(inacc.parts$ID.true))
## [1] 23
# Remove them
d.inaccurate.removed <- d %>%
anti_join(inacc.parts, by = "ID.true")
# Sanity check
length(unique(d.inaccurate.removed$ID.true))
## [1] 95
# Second, remove all inaccurate trials
orig <- nrow(d.inaccurate.removed)
d.inaccurate.removed <- d.inaccurate.removed %>%
filter(Accuracy == 1)
nrow(d.inaccurate.removed)/orig*100
## [1] 94.05759
# Third, Remove subjects with ReactionTime higher than 3x IQR
summary(d.inaccurate.removed$LogReactionTime)
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 2.303 6.426 6.608 6.694 6.872 10.619
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 6.924 7.328 7.436 7.479 7.579 10.008
range(d.inaccurate.removed$LogReactionTime)
## [1] 2.302585 10.618714
hist(d.inaccurate.removed$LogReactionTime, breaks=100, col="lightblue", xlab="LogReactionTime (ms)",
main="Histogram with Normal Curve")
quantile(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)
## 0% 25% 50% 75% 100%
## 2.302585 6.426488 6.608001 6.872128 10.618714
IQR(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)*3 # 0.7526289
## [1] 1.336919
cutoff.high <- quantile(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)[4] + IQR(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)*3 # 8.419261
cutoff.low <- quantile(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)[2] - IQR(d.inaccurate.removed$LogReactionTime, na.rm = TRUE)*3# 6.5088838.419261
# remove subjects with ReactionTime higher than 3 x IQR
df.outliers.removed <- subset(d.inaccurate.removed, (d.inaccurate.removed$LogReactionTime > cutoff.low) & (d.inaccurate.removed$LogReactionTime < cutoff.high))
hist(df.outliers.removed$LogReactionTime, breaks=100, col="lightblue", xlab="LogReactionTime (ms)",
main="Histogram with Normal Curve")
ggplot(df.outliers.removed, aes(x=LogReactionTime, fill=Task)) +
# facet_wrap(~BlockOrder) +
geom_density(alpha = .4)
ggplot(df.outliers.removed, aes(x=ReactionTime, fill=Task)) +
# facet_wrap(~BlockOrder) +
geom_density(alpha = .4)
agr <- d.inaccurate.removed %>%
group_by(Task,Category) %>%
summarize(MeanRT = mean(ReactionTime),
SD = sd(ReactionTime),
MeanLogRT = mean(LogReactionTime))
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 7 × 5
## # Groups: Task [3]
## Task Category MeanRT SD MeanLogRT
## <chr> <chr> <dbl> <dbl> <dbl>
## 1 Animacy Nouns 831. 483. 6.63
## 2 Concrete Adjs 1012. 667. 6.81
## 3 Concrete Nouns 934. 631. 6.74
## 4 Concrete Verbs 1071. 1091. 6.85
## 5 Valence Adjs 780. 350. 6.60
## 6 Valence Nouns 834. 669. 6.63
## 7 Valence Verbs 866. 406. 6.69
agr <- df.outliers.removed %>%
group_by(Task,Word) %>%
summarize(MeanLogReactionTime = mean(LogReactionTime),
CILow = ci.low(LogReactionTime),
CIHigh = ci.high(LogReactionTime)) %>%
mutate(YMin = MeanLogReactionTime - CILow,
YMax = MeanLogReactionTime + CIHigh)
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x=MeanLogReactionTime, fill=Task)) +
geom_density(alpha = .4)
ggplot(agr, aes(x=Task, y=MeanLogReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2)) +
# geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position="dodge", show.legend = FALSE) +
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill = "none")
agr <- df.outliers.removed %>%
group_by(Task,Word) %>%
summarize(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'Task'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x=MeanReactionTime, fill=Task)) +
geom_density(alpha = .4)
ggplot(agr, aes(x=Task, y=MeanReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2)) +
# geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position="dodge", show.legend = FALSE) +
# theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
guides(fill = "none")
# ggsave("../graphs/total_rt_violin.pdf",width = 4, height = 3)
prop <- d %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(Word) %>%
summarize(PropConcrete = mean(Response.n))
agr <- df.outliers.removed %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1) %>%
group_by(Word,ConcValCombo) %>%
summarize(PropConcrete = mean(Response.n),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
ggplot(agr, aes(x = MeanReactionTime, y = PropConcrete)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_text(aes(label = Word,color=ConcValCombo), vjust = -0.5, hjust = 1.5)
## `geom_smooth()` using formula = 'y ~ x'
# geom_text_repel(aes(label = Word, color = ConcValCombo),
# vjust = -0.5, hjust = 1.5) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette)
Yes, looks like choosing negative is faster than choosing positive d
just barely, choosing abstract has a negative effect on RT
Nope.
agr <- df.outliers.removed %>%
group_by(Task,Category) %>%
reframe(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=reorder(Category,MeanReactionTime),y=MeanReactionTime,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
# facet_wrap(~Task) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
agr <- df.outliers.removed %>%
group_by(Word,Task,Category) %>%
reframe(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
ggplot(agr, aes(x=Category, y=MeanReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
geom_jitter(shape=16, position=position_jitter(0.2)) +
guides(fill = "none")
test <- df.outliers.removed %>%
# filter(Task %in% c("Concrete", "Valence")) %>% # Keep only relevant tasks
group_by(Word, Task) %>%
summarise(
RT = mean(ReactionTime, na.rm = TRUE), # Take the mean RT if duplicates exist
.groups = "drop_last" # Drop grouping by Task but keep Word and ID.true
) %>%
pivot_wider(names_from = Task, values_from = RT, names_prefix = "RT_") %>% # Reshape to wide format
filter((RT_Concrete <= RT_Valence) | (RT_Animacy <= RT_Valence)) %>% # Apply the condition
pivot_longer(
cols = starts_with("RT_"), # Select the reshaped columns
names_to = "Task", # Restore Task column
names_prefix = "RT_", # Remove "RT_" prefix to match original Task names
values_to = "ReactionTime" # Column for the RT values
) %>%
ungroup()
nrow(test)/nrow(df.outliers.removed)
## [1] 0.005041311
agr <- test %>%
group_by(Word, Task) %>%
summarize(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Task,y=MeanReactionTime,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
## Warning: Removed 29 rows containing missing values or values outside the scale range
## (`geom_bar()`).
# guides(fill = "none")
ggplot(agr,aes(x=Word, y=MeanReactionTime, alpha=Task, fill=Task)) +
geom_bar(position="dodge",stat="identity") +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
# facet_wrap(~Task, ncol=10) +
xlab("ConcValCombo") +
ylab("MeanAccuracy") +
guides(fill=FALSE) +
# guides(alpha=guide_legend(title="Task")) +
theme(legend.key.size = unit(0.3, "cm"),
legend.position = "top", # c(.5,1)
legend.direction = "horizontal",
legend.margin=margin(0,0,0,0),
legend.box.margin=margin(0,0,-5,-5),legend.spacing.y = unit(0.001, 'cm')) +
# scale_fill_manual(values=cbPalette) +
# scale_color_manual(values=cbPalette) +
scale_alpha_discrete(range = c(.5,1)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
## Warning: The `<scale>` argument of `guides()` cannot be `FALSE`. Use "none" instead as
## of ggplot2 3.3.4.
## This warning is displayed once every 8 hours.
## Call `lifecycle::last_lifecycle_warnings()` to see where this warning was
## generated.
## Warning: Using alpha for a discrete variable is not advised.
## Warning: Removed 29 rows containing missing values or values outside the scale range
## (`geom_bar()`).
test_avv <- d %>%
filter(Word %in% test$Word) %>%
group_by(Word, Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
CILow = ci.low(Accuracy),
CIHigh = ci.high(Accuracy),
) %>%
mutate(YMin = MeanAccuracy - CILow,
YMax = MeanAccuracy + CIHigh)
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
# View(test_avv)
dodge = position_dodge(.9)
ggplot(data=test_avv, aes(x=Task,y=MeanAccuracy,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
test_avv <- d %>%
filter(Word %in% test$Word) %>%
group_by(Word, Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
ggplot(test_avv, aes(x = MeanReactionTime, y = MeanAccuracy)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
# geom_text(aes(label = Word, color = Task), vjust = -0.5, hjust = 1.5)
geom_text_repel(aes(label = Word, color = Task),
vjust = -0.5, hjust = 1.5) +
scale_fill_manual(values=cbPalette) +
scale_color_manual(values=cbPalette)
## `geom_smooth()` using formula = 'y ~ x'
## Warning: ggrepel: 36 unlabeled data points (too many overlaps). Consider
## increasing max.overlaps
# Compute highest accuracy for Concrete
concrete_accuracy <- d %>%
group_by(Word,Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime)) %>%
filter(Task == "Concrete") %>%
select(Word, MeanAccuracy) %>%
rename(ConcreteAccuracy = MeanAccuracy) %>%
arrange(desc(ConcreteAccuracy)) %>%
head(10)
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
agr <- df.outliers.removed %>%
filter(Word %in% concrete_accuracy$Word) %>%
group_by(Word,Task) %>%
summarize(MeanAccuracy = mean(Accuracy),
MeanReactionTime = mean(ReactionTime))
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 20 × 4
## # Groups: Word [10]
## Word Task MeanAccuracy MeanReactionTime
## <chr> <chr> <dbl> <dbl>
## 1 Despairing Concrete 1 929.
## 2 Despairing Valence 1 706.
## 3 Hopeless Concrete 1 882.
## 4 Hopeless Valence 1 722.
## 5 chocolate Concrete 1 747.
## 6 chocolate Valence 1 697.
## 7 despair Concrete 1 818.
## 8 despair Valence 1 746.
## 9 grief Concrete 1 861.
## 10 grief Valence 1 769.
## 11 inspiration Concrete 1 800.
## 12 inspiration Valence 1 782.
## 13 kindness Concrete 1 947.
## 14 kindness Valence 1 635.
## 15 kiss Concrete 1 877.
## 16 kiss Valence 1 831.
## 17 mud Concrete 1 906.
## 18 mud Valence 1 770.
## 19 shame Concrete 1 849.
## 20 shame Valence 1 705.
ggplot(agr, aes(x = MeanReactionTime, y = MeanAccuracy)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "black") +
geom_text(aes(label = Word, color = Task), vjust = -0.5, hjust = 1.5)
## `geom_smooth()` using formula = 'y ~ x'
# guides(legend = "none")
# theme(legend.position = "none") # Remove the legend
# ggsave("../graphs/exp1b_accXrt.pdf",width = 5, height = 3)
agr <- df.outliers.removed %>%
filter(Word %in% concrete_accuracy$Word) %>%
group_by(Word, Task) %>%
summarize(MeanReactionTime = mean(ReactionTime),
CILow = ci.low(ReactionTime),
CIHigh = ci.high(ReactionTime)) %>%
mutate(YMin = MeanReactionTime - CILow,
YMax = MeanReactionTime + CIHigh)
## `summarise()` has grouped output by 'Word'. You can override using the
## `.groups` argument.
print(agr)
## # A tibble: 20 × 7
## # Groups: Word [10]
## Word Task MeanReactionTime CILow CIHigh YMin YMax
## <chr> <chr> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 Despairing Concrete 929. 161. 188. 768. 1116.
## 2 Despairing Valence 706. 53.4 54.4 653. 760.
## 3 Hopeless Concrete 882. 140. 162. 742. 1043.
## 4 Hopeless Valence 722. 64.0 62.8 658. 785.
## 5 chocolate Concrete 747. 64.4 67.5 682. 814.
## 6 chocolate Valence 697. 48.2 49.2 649. 747.
## 7 despair Concrete 818. 105. 128. 713. 946.
## 8 despair Valence 746. 67.4 74.4 678. 820.
## 9 grief Concrete 861. 97.5 106. 764. 967.
## 10 grief Valence 769. 89.4 117. 680. 886.
## 11 inspiration Concrete 800. 65.6 72.8 735. 873.
## 12 inspiration Valence 782. 67.9 78.2 714. 860.
## 13 kindness Concrete 947. 126. 160. 821. 1107.
## 14 kindness Valence 635. 32.8 36.8 602. 671.
## 15 kiss Concrete 877. 106. 122. 771. 999.
## 16 kiss Valence 831. 89.9 114. 741. 945.
## 17 mud Concrete 906. 117. 130. 789. 1036.
## 18 mud Valence 770. 88.7 91.4 681. 861.
## 19 shame Concrete 849. 81.1 97.1 768. 946.
## 20 shame Valence 705. 43.9 49.5 661. 755.
dodge = position_dodge(.9)
ggplot(data=agr, aes(x=Word,y=MeanReactionTime,fill=Task)) +
geom_bar(position=dodge,stat="identity") +
# facet_wrap(~Word) +
geom_errorbar(aes(ymin=YMin,ymax=YMax),width=.25,position=position_dodge(0.9)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
agr <- df.outliers.removed %>%
filter(Word %in% concrete_accuracy$Word)
ggplot(agr, aes(x=Word, y=ReactionTime,fill=Task)) +
geom_violin(trim=FALSE,alpha=.4) +
# Median dot
stat_summary(fun = median, geom = "point",
shape = 21, size = 1.5,
position = position_dodge(width=0.9)) + # Centering the median dot
# geom_jitter(shape=10, position=position_jitter(0.2)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
# guides(fill = "none")
convert everything to factors
Yes
m = lmer(LogReactionTime ~ Task + (1|ID.true) + (1|Word), data=center)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Task + (1 | ID.true) + (1 | Word)
## Data: center
##
## REML criterion at convergence: 9878.4
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.4568 -0.6512 -0.1716 0.4472 5.3564
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.002692 0.05189
## ID.true (Intercept) 0.032749 0.18097
## Residual 0.089738 0.29956
## Number of obs: 21423, groups: Word, 226; ID.true, 95
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.672e+00 2.086e-02 1.450e+02 319.815 < 2e-16 ***
## TaskConcrete 9.314e-02 1.032e-02 2.105e+04 9.021 < 2e-16 ***
## TaskValence -4.412e-02 9.279e-03 2.116e+04 -4.755 1.99e-06 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) TskCnc
## TaskConcret -0.399
## TaskValence -0.402 0.897
table(center$Category)
##
## Adjs Nouns Verbs
## 5340 11815 4268
m = lmer(LogReactionTime ~ Category + (1|ID.true) + (1|Word), data=center)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Category + (1 | ID.true) + (1 | Word)
## Data: center
##
## REML criterion at convergence: 10771.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.1545 -0.6492 -0.1881 0.4500 5.2729
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.002583 0.05082
## ID.true (Intercept) 0.033085 0.18189
## Residual 0.093664 0.30605
## Number of obs: 21423, groups: Word, 226; ID.true, 95
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.68979 0.03781 96.26099 176.917 <2e-16 ***
## CategoryNouns -0.02990 0.04579 96.91688 -0.653 0.515
## CategoryVerbs 0.06257 0.05702 97.15462 1.097 0.275
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) CtgryN
## CategoryNns -0.826
## CategryVrbs -0.663 0.549
m = lmer(LogReactionTime ~ Task*Category + (1|ID.true) + (1|Word), data=center)
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Task * Category + (1 | ID.true) + (1 | Word)
## Data: center
##
## REML criterion at convergence: 9808.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.4766 -0.6492 -0.1704 0.4463 5.3271
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.002679 0.05175
## ID.true (Intercept) 0.032848 0.18124
## Residual 0.089362 0.29893
## Number of obs: 21423, groups: Word, 226; ID.true, 95
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.642e+00 3.901e-02 1.106e+02 170.271 < 2e-16
## TaskConcrete 1.513e-01 1.239e-02 2.127e+04 12.204 < 2e-16
## TaskValence -4.425e-02 9.268e-03 2.121e+04 -4.774 1.82e-06
## CategoryNouns 2.510e-02 4.589e-02 9.925e+01 0.547 0.586
## CategoryVerbs 8.471e-02 5.714e-02 9.951e+01 1.482 0.141
## TaskConcrete:CategoryNouns -1.018e-01 1.070e-02 2.124e+04 -9.519 < 2e-16
## TaskConcrete:CategoryVerbs -5.250e-02 1.231e-02 2.116e+04 -4.264 2.02e-05
##
## (Intercept) ***
## TaskConcrete ***
## TaskValence ***
## CategoryNouns
## CategoryVerbs
## TaskConcrete:CategoryNouns ***
## TaskConcrete:CategoryVerbs ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr) TskCnc TskVln CtgryN CtgryV TsC:CN
## TaskConcret -0.244
## TaskValence -0.238 0.748
## CategoryNns -0.810 0.083 0.035
## CategryVrbs -0.644 0.045 0.000 0.549
## TskCncrt:CN 0.078 -0.513 -0.003 -0.096 -0.052
## TskCncrt:CV 0.067 -0.444 0.000 -0.057 -0.104 0.514
## fit warnings:
## fixed-effect model matrix is rank deficient so dropping 2 columns / coefficients
str(df_factors)
## tibble [21,423 × 23] (S3: tbl_df/tbl/data.frame)
## $ ...1 : Factor w/ 5014 levels "1","2","3","4",..: 1 3 4 5 6 7 8 9 10 11 ...
## $ ID.true : Factor w/ 95 levels "55132578fdf99b1b0568bdb8",..: 13 13 13 13 13 13 13 13 13 13 ...
## $ Word : Factor w/ 226 levels "admired","annoy",..: 129 176 2 22 182 11 144 199 119 217 ...
## $ Label : Factor w/ 3 levels "test_ani","test_conc",..: 3 3 3 3 3 3 3 3 3 3 ...
## $ ConcValCombo : Factor w/ 4 levels "abstract-negative",..: 1 4 1 4 1 1 3 3 2 3 ...
## $ Task : Factor w/ 3 levels "Animacy","Concrete",..: 3 3 3 3 3 3 3 3 3 3 ...
## $ BlockOrder : Factor w/ 2 levels "CV","VC": 2 2 2 2 2 2 2 2 2 2 ...
## $ Group : Factor w/ 6 levels "A","abstract;concrete",..: 3 3 3 3 3 3 3 3 3 3 ...
## $ Response : Factor w/ 6 levels "abstract","animate",..: 5 6 5 6 5 5 5 5 6 5 ...
## $ Accuracy : Factor w/ 1 level "1": 1 1 1 1 1 1 1 1 1 1 ...
## $ EventTime : Factor w/ 21360 levels "1732203168805",..: 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 ...
## $ Value : Factor w/ 6 levels "abstract","animate",..: 5 6 5 6 5 5 5 5 6 5 ...
## $ RT : Factor w/ 2641 levels "1231.66666666667",..: 1827 2641 2151 2589 2246 2394 2121 2109 1798 1831 ...
## $ ReactionTime : num [1:21423] 1164 1184 1660 1157 1264 ...
## $ Key_value_F : Factor w/ 6 levels "A","abstract",..: 5 5 5 5 5 5 5 5 5 5 ...
## $ Key_value_J : Factor w/ 6 levels "A","animate;inanimate",..: 3 3 3 3 3 3 3 3 3 3 ...
## $ Comments : Factor w/ 0 levels: NA NA NA NA NA NA NA NA NA NA ...
## $ LogReactionTime: num [1:21423] 7.06 7.08 7.41 7.05 7.14 ...
## $ LogRT : Factor w/ 2641 levels "7.11612354471419",..: 1827 2641 2151 2589 2246 2394 2121 2109 1798 1831 ...
## $ TrialNumber : Factor w/ 252 levels "1","2","3","4",..: 1 3 4 5 6 7 8 9 10 11 ...
## $ Category : Factor w/ 3 levels "Adjs","Nouns",..: 3 3 3 3 3 3 3 3 3 3 ...
## $ Animacy : Factor w/ 2 levels "animate","inanimate": NA NA NA NA NA NA NA NA NA NA ...
## $ Valence : Factor w/ 2 levels "negative","positive": NA NA NA NA NA NA NA NA NA NA ...
conc <- df_factors %>%
filter(Task == "Concrete") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("abstract", "concrete"))) - 1,
Concrete = ifelse(grepl("abstract", ConcValCombo), "abstract",
ifelse(grepl("concrete", ConcValCombo), "concrete", NA)),
cConcValCombo = as.numeric(ConcValCombo) - mean(as.numeric(ConcValCombo)),
cConcrete = as.numeric(as.factor(Concrete)) - mean(as.numeric(as.factor(Concrete))),
# cSyntactic = as.numeric(factor(Syntactic)) - mean(as.numeric(factor(Syntactic)))
)
m = lmer(LogReactionTime ~ cConcValCombo + (1+cConcValCombo|ID.true) + (1+cConcValCombo|Word), data=conc)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cConcValCombo + (1 + cConcValCombo | ID.true) +
## (1 + cConcValCombo | Word)
## Data: conc
##
## REML criterion at convergence: 4613.8
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.6323 -0.6449 -0.1766 0.4766 4.9365
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## Word (Intercept) 0.002498 0.04998
## cConcValCombo 0.002526 0.05026 0.82
## ID.true (Intercept) 0.042551 0.20628
## cConcValCombo 0.002551 0.05051 -0.15
## Residual 0.094262 0.30702
## Number of obs: 8372, groups: Word, 196; ID.true, 77
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.790e+00 2.426e-02 8.224e+01 279.860 <2e-16 ***
## cConcValCombo -7.517e-04 8.212e-03 1.215e+02 -0.092 0.927
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cConcValCmb -0.019
No effect of word concreteness
m = lmer(LogReactionTime ~ cConcrete + (1+cConcrete|ID.true) + (1|Word), data=conc)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cConcrete + (1 + cConcrete | ID.true) + (1 |
## Word)
## Data: conc
##
## REML criterion at convergence: 4591.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.6472 -0.6501 -0.1797 0.4772 5.0350
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## Word (Intercept) 0.004929 0.07021
## ID.true (Intercept) 0.042413 0.20594
## cConcrete 0.014480 0.12033 -0.14
## Residual 0.093722 0.30614
## Number of obs: 8372, groups: Word, 196; ID.true, 77
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.78054 0.02427 82.89093 279.43 <2e-16 ***
## cConcrete -0.02351 0.01836 126.50710 -1.28 0.203
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cConcrete -0.104
Small effect of response choice
m = lmer(LogReactionTime ~ Response.n + (1|ID.true) + (1|Word), data=conc)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Response.n + (1 | ID.true) + (1 | Word)
## Data: conc
##
## REML criterion at convergence: 4749.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.4064 -0.6558 -0.1794 0.4899 4.9799
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.005499 0.07416
## ID.true (Intercept) 0.042090 0.20516
## Residual 0.096810 0.31114
## Number of obs: 8372, groups: Word, 196; ID.true, 77
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.78847 0.02502 94.43954 271.289 <2e-16 ***
## Response.n -0.02150 0.01262 185.60793 -1.704 0.0901 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## Response.n -0.247
val <- df.outliers.removed %>%
filter(Task == "Valence") %>%
filter(!is.na(ConcValCombo)) %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("negative", "positive"))) - 1,
Valence = case_when(
grepl("negative", ConcValCombo) ~ "negative",
grepl("positive", ConcValCombo) ~ "positive",
TRUE ~ NA_character_
),
# Valence = ifelse(grepl("negative", ConcValCombo), "negative",
# ifelse(grepl("positive", ConcValCombo), "positive", NA)),
cConcValCombo = as.numeric(as.factor(ConcValCombo)) - mean(as.numeric(as.factor(ConcValCombo))),
cValence = as.numeric(as.factor(Valence)) - mean(as.numeric(as.factor(Valence)))
)
sum(is.na(val$ConcValCombo)) # Count missing values
## [1] 0
sum(is.na(val$LogReactionTime)) # Check for missing LogReactionTime
## [1] 0
var(val$cConcValCombo)
## [1] 1.246584
unique(val$ConcValCombo)
## [1] "abstract-negative" "concrete-positive" "concrete-negative"
## [4] "abstract-positive"
# View(val)
# valna <- val %>%
# filter(is.na(val$ConcValCombo))
m = lmer(LogReactionTime ~ cConcValCombo + (1+cConcValCombo|ID.true) + (1+cConcValCombo|Word), data=val)
## boundary (singular) fit: see help('isSingular')
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cConcValCombo + (1 + cConcValCombo | ID.true) +
## (1 + cConcValCombo | Word)
## Data: val
##
## REML criterion at convergence: 1853
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.0824 -0.6201 -0.1528 0.4390 5.7738
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## Word (Intercept) 3.730e-03 0.061070
## cConcValCombo 1.093e-06 0.001046 -1.00
## ID.true (Intercept) 4.138e-02 0.203412
## cConcValCombo 3.431e-04 0.018524 0.05
## Residual 6.725e-02 0.259324
## Number of obs: 8907, groups: Word, 196; ID.true, 77
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.638771 0.023771 81.083818 279.280 <2e-16 ***
## cConcValCombo -0.005728 0.005051 167.525286 -1.134 0.258
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cConcValCmb 0.016
## optimizer (nloptwrap) convergence code: 0 (OK)
## boundary (singular) fit: see help('isSingular')
Effect of Word Valence
m = lmer(LogReactionTime ~ cValence + (1+cValence|ID.true) + (1|Word), data=val)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cValence + (1 + cValence | ID.true) + (1 |
## Word)
## Data: val
##
## REML criterion at convergence: 1809.5
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.2369 -0.6191 -0.1465 0.4441 5.8113
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## Word (Intercept) 0.003671 0.06058
## ID.true (Intercept) 0.041438 0.20356
## cValence 0.003828 0.06187 0.04
## Residual 0.066700 0.25826
## Number of obs: 8907, groups: Word, 196; ID.true, 77
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.63902 0.02378 81.01248 279.187 <2e-16 ***
## cValence -0.02925 0.01254 160.56427 -2.332 0.0209 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cValence 0.020
Effect of response choice
m = lmer(LogReactionTime ~ Response.n + (1|ID.true) + (1|Word), data=val)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Response.n + (1 | ID.true) + (1 | Word)
## Data: val
##
## REML criterion at convergence: 1858.1
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -3.0661 -0.6212 -0.1492 0.4456 5.7690
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.003586 0.05988
## ID.true (Intercept) 0.041369 0.20339
## Residual 0.067655 0.26011
## Number of obs: 8907, groups: Word, 196; ID.true, 77
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.65332 0.02430 88.34958 273.812 < 2e-16 ***
## Response.n -0.02941 0.01026 184.41563 -2.866 0.00464 **
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## Response.n -0.211
ani <- df_factors %>%
filter(Task == "Animacy") %>%
mutate(Response.n = as.numeric(factor(Response, levels = c("inanimate", "animate"))) - 1,
cAnimacy = as.numeric(Animacy) - mean(as.numeric(Animacy)))
Effect of word animacy
m = lmer(LogReactionTime ~ cAnimacy + (1+cAnimacy|ID.true) + (1|Word), data=ani)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ cAnimacy + (1 + cAnimacy | ID.true) + (1 |
## Word)
## Data: ani
##
## REML criterion at convergence: 1148.7
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.3632 -0.6116 -0.1985 0.4071 5.1028
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## Word (Intercept) 0.007883 0.08879
## ID.true (Intercept) 0.021658 0.14717
## cAnimacy 0.006590 0.08118 0.84
## Residual 0.095076 0.30834
## Number of obs: 2079, groups: Word, 40; ID.true, 18
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.62994 0.03804 22.39932 174.283 <2e-16 ***
## cAnimacy 0.09208 0.03662 40.94232 2.514 0.016 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## cAnimacy 0.408
No effect of response choice
m = lmer(LogReactionTime ~ Response.n + (1|ID.true) + (1|Word), data=ani)
summary(m)
## Linear mixed model fit by REML. t-tests use Satterthwaite's method [
## lmerModLmerTest]
## Formula: LogReactionTime ~ Response.n + (1 | ID.true) + (1 | Word)
## Data: ani
##
## REML criterion at convergence: 1181.9
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -2.2693 -0.6231 -0.2037 0.3908 5.0174
##
## Random effects:
## Groups Name Variance Std.Dev.
## Word (Intercept) 0.009672 0.09834
## ID.true (Intercept) 0.021860 0.14785
## Residual 0.096714 0.31099
## Number of obs: 2079, groups: Word, 40; ID.true, 18
##
## Fixed effects:
## Estimate Std. Error df t value Pr(>|t|)
## (Intercept) 6.632630 0.041312 29.609700 160.549 <2e-16 ***
## Response.n -0.008296 0.026878 110.576537 -0.309 0.758
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation of Fixed Effects:
## (Intr)
## Response.n -0.345